__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
- if ( unlikely(!(regs->xcs & 3)) )
- goto fault_in_hypervisor;
-
if ( unlikely(addr > PAGE_OFFSET) )
goto fault_in_xen_space;
bounce_fault:
+ if ( unlikely(!(regs->xcs & 3)) )
+ goto fault_in_hypervisor;
+
ti = p->thread.traps + 14;
gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */
gtb->cr2 = addr;
gtb->eip = ti->address;
return;
-
+ /*
+ * FAULT IN XEN ADDRESS SPACE:
+ * We only deal with one kind -- a fault in the shadow LDT mapping.
+ * If this occurs we pull a mapping from the guest's LDT, if it is
+ * valid. Otherwise we send the fault up to the guest OS to be handled.
+ */
fault_in_xen_space:
if ( (addr < LDT_VIRT_START) ||
page->flags |= PGT_ldt_page;
}
+ /* Success! */
get_page_type(page);
get_page_tot(page);
p->mm.perdomain_pt[l1_table_offset(off)+16] = mk_l1_pgentry(l1e|_PAGE_RW);
+ p->mm.shadow_ldt_mapcnt++;
spin_unlock(&p->page_lock);
return;
spinlock_t free_list_lock = SPIN_LOCK_UNLOCKED;
unsigned int free_pfns;
-static int tlb_flush[NR_CPUS];
+/* Used to defer flushing of memory structures. */
+static int flush_tlb[NR_CPUS] __cacheline_aligned;
+
/*
* init_frametable:
unsigned long page_index;
unsigned long flags;
- memset(tlb_flush, 0, sizeof(tlb_flush));
+ memset(flush_tlb, 0, sizeof(flush_tlb));
max_page = nr_pages;
frame_table_size = nr_pages * sizeof(struct pfn_info);
}
+static void __invalidate_shadow_ldt(void)
+{
+ int i;
+ unsigned long pfn;
+ struct pfn_info *page;
+
+ current->mm.shadow_ldt_mapcnt = 0;
+
+ for ( i = 16; i < 32; i++ )
+ {
+ pfn = l1_pgentry_to_pagenr(current->mm.perdomain_pt[i]);
+ if ( pfn == 0 ) continue;
+ current->mm.perdomain_pt[i] = mk_l1_pgentry(0);
+ page = frame_table + pfn;
+ ASSERT((page->flags & PG_type_mask) == PGT_ldt_page);
+ ASSERT((page->flags & PG_domain_mask) == current->domain);
+ ASSERT((page->type_count != 0) && (page->tot_count != 0));
+ put_page_type(page);
+ put_page_tot(page);
+ }
+}
+static inline void invalidate_shadow_ldt(void)
+{
+ if ( current->mm.shadow_ldt_mapcnt != 0 )
+ __invalidate_shadow_ldt();
+}
+
+
/* Return original refcnt, or -1 on error. */
static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
{
return get_page_type(page);
}
+
/* Return new refcnt, or -1 on error. */
static int dec_page_refcnt(unsigned long page_nr, unsigned int type)
{
return ret;
}
+
static int get_l1_table(unsigned long page_nr)
{
l1_pgentry_t *p_l1_entry, l1_entry;
return ret;
}
+
static int get_page(unsigned long page_nr, int writeable)
{
struct pfn_info *page;
return(0);
}
+
static void put_l2_table(unsigned long page_nr)
{
l2_pgentry_t *p_l2_entry, l2_entry;
unmap_domain_mem(p_l2_entry);
}
+
static void put_l1_table(unsigned long page_nr)
{
l1_pgentry_t *p_l1_entry, l1_entry;
unmap_domain_mem(p_l1_entry-1);
}
+
static void put_page(unsigned long page_nr, int writeable)
{
struct pfn_info *page;
((page_type_count(page) != 0) &&
((page->flags & PG_type_mask) == PGT_writeable_page) &&
((page->flags & PG_need_flush) == PG_need_flush)));
- if ( writeable && (put_page_type(page) == 0) )
+ if ( writeable )
+ {
+ if ( put_page_type(page) == 0 )
+ {
+ flush_tlb[smp_processor_id()] = 1;
+ page->flags &= ~PG_need_flush;
+ }
+ }
+ else if ( unlikely(((page->flags & PG_type_mask) == PGT_ldt_page) &&
+ (page_type_count(page) != 0)) )
{
- tlb_flush[smp_processor_id()] = 1;
- page->flags &= ~PG_need_flush;
+ /* We expect this is rare so we just blow the entire shadow LDT. */
+ invalidate_shadow_ldt();
}
put_page_tot(page);
}
{
put_l2_table(pagetable_val(current->mm.pagetable) >> PAGE_SHIFT);
current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
+ invalidate_shadow_ldt();
+ flush_tlb[smp_processor_id()] = 1;
}
else
{
MEM_LOG("Error while installing new baseptr %08lx %d", ptr, err);
}
- /* fall through */
+ break;
case PGEXT_TLB_FLUSH:
- tlb_flush[smp_processor_id()] = 1;
+ flush_tlb[smp_processor_id()] = 1;
break;
case PGEXT_INVLPG:
case PGEXT_SET_LDT:
{
- int i;
unsigned long ents = val >> PGEXT_CMD_SHIFT;
if ( ((ptr & (PAGE_SIZE-1)) != 0) ||
(ents > 8192) ||
{
if ( current->mm.ldt_ents != 0 )
{
- /* Tear down the old LDT. */
- for ( i = 16; i < 32; i++ )
- {
- pfn = l1_pgentry_to_pagenr(current->mm.perdomain_pt[i]);
- if ( pfn == 0 ) continue;
- current->mm.perdomain_pt[i] = mk_l1_pgentry(0);
- page = frame_table + pfn;
- ASSERT((page->flags & PG_type_mask) == PGT_ldt_page);
- ASSERT((page->flags & PG_domain_mask) == current->domain);
- ASSERT((page->type_count != 0) && (page->tot_count != 0));
- put_page_type(page);
- put_page_tot(page);
- }
- tlb_flush[smp_processor_id()] = 1;
+ invalidate_shadow_ldt();
+ flush_tlb[smp_processor_id()] = 1;
}
current->mm.ldt_base = ptr;
current->mm.ldt_ents = ents;
return err;
}
+
int do_process_page_updates(page_update_request_t *ureqs, int count)
{
page_update_request_t req;
ureqs++;
}
- if ( tlb_flush[smp_processor_id()] )
+ if ( flush_tlb[smp_processor_id()] )
{
- tlb_flush[smp_processor_id()] = 0;
+ flush_tlb[smp_processor_id()] = 0;
__write_cr3_counted(pagetable_val(current->mm.pagetable));
}